Linear Models

Actual Positive (AP) | Actual Negative (AN) | Prevalence |
Accuracy |
||
Predicted Positive (PP) | True Positive (TP) | False Positive (FP) | Precision, Positive Predictive Value (PPV) |
False Discovery Rate (FDR) |
|
Predicted Negative (PN) | False Negative (FN) | True Negative (TN) | False Omission Rate (FOR) |
Negative Predictive Rate (NPV) |
|
True Positive Rate (TPR), Recall, Sensitivity, Probability of Detection, |
False Positive Rate (FPR), Fall-out, Probability of False |
Positive Likelihood Ratio |
Diagnostic Odds Ratio (DOR) |
F1 Score |
|
False Negative Rate (FNR), Miss Rate |
True Negative Rate (TNR), Specificity (SPC), Selectivity, |
Negative Likelihood Ratio (LR-) |
from sklearn import datasets # Dateset
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import confusion_matrix, f1_score # Metric
# Loading the dataset
X, y_true = datasets.make_moons(n_samples=500,noise=0.3,random_state=10) # Generate 500 samples
# Fitting using K-Nearest Neighbors Classifier
knnc = KNeighborsClassifier(n_neighbors=2).fit(X,Y_true)
Y_pred = knnc.predict(X)
# Print evaluation result using the metric
print(confusion_matrix(y_true,y_pred))
print(f1_score(y_true,y_pred))
from sklearn.metrics import plot_confusion_matrix
plot_confusion_matrix(y_true,y_pred)
from sklearn import datasets # Dateset
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import confusion_matrix, roc_auc_score, f1_score # Metric
# Loading the dataset
X, y_true = datasets.make_blobs(n_samples=500, centers=10, n_features=2, random_state=0) # Generate
5 0 0 \text { samples from 10 classes}
# Fitting using K-Nearest Neighbors Classifier
knnc = KNeighborsClassifier(n_neighbors=2).fit(X,y_true)
y_pred = knnc.predict(X)
# Print evaluation result
print(confusion_matrix(y_true,y_pred))
from sklearn.metrics import ConfusionMatrixDisplay
ConfusionMatrixDisplay.from_predictions(y_true,y_pred,cmap = 'plasma')